int alloc_page_type(struct page_info *page, unsigned long type)
{
+ struct domain *owner = page_get_owner(page);
+
+ /* A page table is dirtied when its type count becomes non-zero. */
+ if ( likely(owner != NULL) )
+ mark_dirty(owner, page_to_mfn(page));
+
switch ( type & PGT_type_mask )
{
case PGT_l1_page_table:
*/
this_cpu(percpu_mm_info).deferred_ops |= DOP_FLUSH_ALL_TLBS;
- if ( unlikely(shadow_mode_enabled(owner)
- && !shadow_lock_is_acquired(owner)) )
+ if ( unlikely(shadow_mode_enabled(owner)) )
{
+ /* A page table is dirtied when its type count becomes zero. */
+ mark_dirty(owner, page_to_mfn(page));
+
if ( shadow_mode_refcounts(owner) )
return;
nx &= ~PGT_validated;
}
- /* Record TLB information for flush later. */
- page->tlbflush_timestamp = tlbflush_current_time();
+ /*
+ * Record TLB information for flush later. We do not stamp page
+ * tables when running in shadow mode:
+ * 1. Pointless, since it's the shadow pt's which must be tracked.
+ * 2. Shadow mode reuses this field for shadowed page tables to
+ * store flags info -- we don't want to conflict with that.
+ */
+ if ( !shadow_mode_enabled(page_get_owner(page)) ||
+ ((nx & PGT_type_mask) == PGT_writable_page) )
+ page->tlbflush_timestamp = tlbflush_current_time();
}
}
while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
-
- /*
- * A page table is dirtied when its type count becomes zero.
- * We cannot set the dirty flag earlier than this because we must wait
- * until the type count has been zeroed by the CMPXCHG above.
- */
- if ( unlikely((nx & (PGT_validated|PGT_count_mask)) == 0) )
- mark_dirty(page_get_owner(page), page_to_mfn(page));
}
page_get_owner(page)->domain_dirty_cpumask;
tlbflush_filter(mask, page->tlbflush_timestamp);
- if ( unlikely(!cpus_empty(mask)) )
+ if ( unlikely(!cpus_empty(mask)) &&
+ /* Shadow mode: track only writable pages. */
+ (!shadow_mode_enabled(page_get_owner(page)) ||
+ ((nx & PGT_type_mask) == PGT_writable_page)) )
{
perfc_incrc(need_flush_tlb_flush);
flush_tlb_mask(mask);
/* Noone else is updating simultaneously. */
__set_bit(_PGT_validated, &page->u.inuse.type_info);
-
- /*
- * A page table is dirtied when its type count becomes non-zero. It is
- * safe to mark dirty here because any PTE modifications in
- * alloc_page_type() have now happened.
- */
- mark_dirty(page_get_owner(page), page_to_mfn(page));
}
return 1;
{
put_page_and_type(page);
put_page(page);
- if ( shadow_mode_enabled(d) )
- {
- shadow_lock(d);
- shadow_remove_all_shadows(v, _mfn(mfn));
- /* A page is dirtied when its pin status is cleared. */
- sh_mark_dirty(d, _mfn(mfn));
- shadow_unlock(d);
- }
+ /* A page is dirtied when its pin status is cleared. */
+ mark_dirty(d, mfn);
}
else
{
void shadow_promote(struct vcpu *v, mfn_t gmfn, u32 type)
{
struct page_info *page = mfn_to_page(gmfn);
- unsigned long type_info;
ASSERT(valid_mfn(gmfn));
/* We should never try to promote a gmfn that has writeable mappings */
ASSERT(shadow_remove_write_access(v, gmfn, 0, 0) == 0);
- // Is the page already shadowed?
+ /* Is the page already shadowed? */
if ( !test_and_set_bit(_PGC_page_table, &page->count_info) )
- {
- // No prior shadow exists...
-
- // Grab a type-ref. We don't really care if we are racing with another
- // vcpu or not, or even what kind of type we get; we just want the type
- // count to be > 0.
- //
- do {
- type_info = page->u.inuse.type_info &
- (PGT_type_mask | PGT_pae_xen_l2);
- } while ( !get_page_type(page, type_info) );
-
- // Now that the type ref is non-zero, we can safely use the
- // shadow_flags.
- //
page->shadow_flags = 0;
- }
ASSERT(!test_bit(type >> PGC_SH_type_shift, &page->shadow_flags));
set_bit(type >> PGC_SH_type_shift, &page->shadow_flags);
clear_bit(type >> PGC_SH_type_shift, &page->shadow_flags);
if ( (page->shadow_flags & SHF_page_type_mask) == 0 )
- {
- // release the extra type ref
- put_page_type(page);
-
- // clear the is-a-page-table bit.
clear_bit(_PGC_page_table, &page->count_info);
- }
}
/**************************************************************************/
} u;
union {
- /* Timestamp from 'TLB clock', used to reduce need for safety
- * flushes. Only valid on a) free pages, and b) guest pages with a
- * zero type count. */
+ /*
+ * Timestamp from 'TLB clock', used to avoid extra safety flushes.
+ * Only valid for: a) free pages, and b) pages with zero type count
+ * (except page table pages when the guest is in shadow mode).
+ */
u32 tlbflush_timestamp;
- /* Only used on guest pages with a shadow.
- * Guest pages with a shadow must have a non-zero type count, so this
- * does not conflict with the tlbflush timestamp. */
+ /*
+ * Guest pages with a shadow. This does not conflict with
+ * tlbflush_timestamp since page table pages are explicitly not
+ * tracked for TLB-flush avoidance when a guest runs in shadow mode.
+ */
u32 shadow_flags;
-
- // XXX -- we expect to add another field here, to be used for min/max
- // purposes, which is only used for shadow pages.
};
};
void sh_do_mark_dirty(struct domain *d, mfn_t gmfn);
static inline void mark_dirty(struct domain *d, unsigned long gmfn)
{
- int caller_locked;
-
- if ( unlikely(d == NULL) || likely(!shadow_mode_log_dirty(d)) )
+ if ( likely(!shadow_mode_log_dirty(d)) )
return;
- caller_locked = shadow_lock_is_acquired(d);
- if ( !caller_locked )
- shadow_lock(d);
+ shadow_lock(d);
sh_do_mark_dirty(d, _mfn(gmfn));
- if ( !caller_locked )
- shadow_unlock(d);
+ shadow_unlock(d);
}
/* Internal version, for when the shadow lock is already held */
static inline void sh_mark_dirty(struct domain *d, mfn_t gmfn)
{
ASSERT(shadow_lock_is_acquired(d));
- if ( shadow_mode_log_dirty(d) )
+ if ( unlikely(shadow_mode_log_dirty(d)) )
sh_do_mark_dirty(d, gmfn);
}